[IA64] Remove warning messages
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 23 Mar 2006 20:22:56 +0000 (13:22 -0700)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 23 Mar 2006 20:22:56 +0000 (13:22 -0700)
This patch removed warning messages in vmx_phy_mode.c
and vmx_virt.c.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_virt.c

index aadb7f0fcd89c9e1b40962c2d34648364d7bb669..48b394079677dad49b76bf2b58c6439a6c6640fa 100644 (file)
@@ -177,6 +177,7 @@ vmx_load_all_rr(VCPU *vcpu)
 {
        unsigned long psr;
        ia64_rr phy_rr;
+       extern void * pal_vaddr;
 
        local_irq_save(psr);
 
@@ -188,13 +189,13 @@ vmx_load_all_rr(VCPU *vcpu)
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
                phy_rr.rrval = vcpu->arch.metaphysical_rr0;
- //    phy_rr.ps = PAGE_SHIFT;
-       phy_rr.ve = 1;
+//             phy_rr.ps = PAGE_SHIFT;
+               phy_rr.ve = 1;
 
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
                phy_rr.rrval = vcpu->arch.metaphysical_rr4;
-//     phy_rr.ps = PAGE_SHIFT;
-           phy_rr.ve = 1;
+//             phy_rr.ps = PAGE_SHIFT;
+               phy_rr.ve = 1;
 
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
        } else {
@@ -206,24 +207,24 @@ vmx_load_all_rr(VCPU *vcpu)
 
        /* rr567 will be postponed to last point when resuming back to guest */
        ia64_set_rr((VRN1 << VRN_SHIFT),
-                    vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
+                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
        ia64_set_rr((VRN2 << VRN_SHIFT),
-                    vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
+                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
        ia64_set_rr((VRN3 << VRN_SHIFT),
-                    vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
-    ia64_set_rr((VRN5 << VRN_SHIFT),
-            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
-    ia64_set_rr((VRN6 << VRN_SHIFT),
-            vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
-    extern void * pal_vaddr;
-    vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
-                (void *)vcpu->arch.privregs,
-                (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
-    ia64_set_pta(vcpu->arch.arch_vmx.mpta);
+                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
+       ia64_set_rr((VRN5 << VRN_SHIFT),
+                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+       ia64_set_rr((VRN6 << VRN_SHIFT),
+                       vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+       vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
+                       (void *)vcpu->domain->shared_info,
+                       (void *)vcpu->arch.privregs,
+                       (void *)vcpu->arch.vtlb->vhpt->hash, pal_vaddr );
+       ia64_set_pta(vcpu->arch.arch_vmx.mpta);
 
        ia64_srlz_d();
        ia64_set_psr(psr);
-    ia64_srlz_i();
+       ia64_srlz_i();
 }
 
 void
index 20b324c77ea67294d1987813fe6c1da37081fa4a..48b64ec483384b399048f798af20488064c08f49 100644 (file)
@@ -714,6 +714,7 @@ IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
 {
     // I27 and M30 are identical for these fields
+    UINT64  imm;
     if(inst.M30.ar3!=44){
         panic("Can't support ar register other than itc");
     }
@@ -727,7 +728,6 @@ IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
         return IA64_FAULT;
     }
 #endif // CHECK_FAULT
-    UINT64  imm;
     if(inst.M30.s){
         imm = -inst.M30.imm;
     }else{
@@ -767,6 +767,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
 {
     // I27 and M30 are identical for these fields
+    u64 r1;
     if(inst.M31.ar3!=44){
         panic("Can't support ar register other than itc");
     }
@@ -785,7 +786,6 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
         return IA64_FAULT;
     }
 #endif // CHECK_FAULT
-    u64 r1;
     vmx_vcpu_get_itc(vcpu,&r1);
     vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
     return IA64_NO_FAULT;
@@ -844,8 +844,8 @@ IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
 
 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
 {
-    return IA64_NO_FAULT;
     u64 r3,r2;
+    return IA64_NO_FAULT;
 #ifdef  CHECK_FAULT
     IA64_PSR vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -868,8 +868,8 @@ IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
 
 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
 {
-    return IA64_NO_FAULT;
     u64 r3,r2;
+    return IA64_NO_FAULT;
 #ifdef  CHECK_FAULT
     IA64_PSR vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -1156,6 +1156,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
 {
     u64 r2;
+    extern u64 cr_igfld_mask(int index, u64 value);
 #ifdef  CHECK_FAULT
     IA64_PSR  vpsr;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
@@ -1187,7 +1188,6 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
         return IA64_FAULT;
     }
 #endif  //CHECK_FAULT
-    extern u64 cr_igfld_mask(int index, u64 value);
     r2 = cr_igfld_mask(inst.M32.cr3,r2);
     VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
     switch (inst.M32.cr3) {